#include <linux/version.h>
#include <asm/io.h>
#include <xen/balloon.h>
+#include <xen/gnttab.h>
#include <asm/swiotlb.h>
#include <asm/tlbflush.h>
#include <asm-i386/mach-xen/asm/swiotlb.h>
} else {
for (i = 0; i < nents; i++ ) {
sg[i].dma_address =
- page_to_bus(sg[i].page) + sg[i].offset;
+ gnttab_dma_map_page(sg[i].page) + sg[i].offset;
sg[i].dma_length = sg[i].length;
BUG_ON(!sg[i].page);
IOMMU_BUG_ON(address_needs_mapping(
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
+ int i;
+
BUG_ON(direction == DMA_NONE);
if (swiotlb)
swiotlb_unmap_sg(hwdev, sg, nents, direction);
+ else {
+ for (i = 0; i < nents; i++ )
+ gnttab_dma_unmap_page(sg[i].dma_address);
+ }
}
EXPORT_SYMBOL(dma_unmap_sg);
dma_addr = swiotlb_map_page(
dev, page, offset, size, direction);
} else {
- dma_addr = page_to_bus(page) + offset;
+ dma_addr = gnttab_dma_map_page(page) + offset;
IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
}
BUG_ON(direction == DMA_NONE);
if (swiotlb)
swiotlb_unmap_page(dev, dma_address, size, direction);
+ else
+ gnttab_dma_unmap_page(dma_address);
}
EXPORT_SYMBOL(dma_unmap_page);
#endif /* CONFIG_HIGHMEM */
if (swiotlb) {
dma = swiotlb_map_single(dev, ptr, size, direction);
} else {
- dma = virt_to_bus(ptr);
+ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
+ offset_in_page(ptr);
IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
IOMMU_BUG_ON(address_needs_mapping(dev, dma));
}
BUG();
if (swiotlb)
swiotlb_unmap_single(dev, dma_addr, size, direction);
+ else
+ gnttab_dma_unmap_page(dma_addr);
}
EXPORT_SYMBOL(dma_unmap_single);
#include <asm/pci.h>
#include <asm/dma.h>
#include <asm/uaccess.h>
+#include <xen/gnttab.h>
#include <xen/interface/memory.h>
int swiotlb;
#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
-#define SG_ENT_PHYS_ADDRESS(sg) (page_to_bus((sg)->page) + (sg)->offset)
-
/*
* Maximum allowable number of contiguous slabs to map,
* must be a power of 2. What is the appropriate value ?
dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
- dma_addr_t dev_addr = virt_to_bus(ptr);
+ dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
+ offset_in_page(ptr);
void *map;
struct phys_addr buffer;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
+ gnttab_dma_unmap_page(dev_addr);
buffer.page = virt_to_page(ptr);
buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
map = map_single(hwdev, buffer, size, dir);
BUG_ON(dir == DMA_NONE);
if (in_swiotlb_aperture(dev_addr))
unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
+ else
+ gnttab_dma_unmap_page(dev_addr);
}
/*
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++) {
- dev_addr = SG_ENT_PHYS_ADDRESS(sg);
+ dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
+
if (address_needs_mapping(hwdev, dev_addr)) {
+ gnttab_dma_unmap_page(dev_addr);
buffer.page = sg->page;
buffer.offset = sg->offset;
map = map_single(hwdev, buffer, sg->length, dir);
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++)
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+ if (in_swiotlb_aperture(sg->dma_address))
unmap_single(hwdev,
(void *)bus_to_virt(sg->dma_address),
sg->dma_length, dir);
+ else
+ gnttab_dma_unmap_page(sg->dma_address);
}
/*
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++)
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+ if (in_swiotlb_aperture(sg->dma_address))
sync_single(hwdev,
(void *)bus_to_virt(sg->dma_address),
sg->dma_length, dir);
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++)
- if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
+ if (in_swiotlb_aperture(sg->dma_address))
sync_single(hwdev,
(void *)bus_to_virt(sg->dma_address),
sg->dma_length, dir);
dma_addr_t dev_addr;
char *map;
- dev_addr = page_to_bus(page) + offset;
+ dev_addr = gnttab_dma_map_page(page) + offset;
if (address_needs_mapping(hwdev, dev_addr)) {
+ gnttab_dma_unmap_page(dev_addr);
buffer.page = page;
buffer.offset = offset;
map = map_single(hwdev, buffer, size, direction);
BUG_ON(direction == DMA_NONE);
if (in_swiotlb_aperture(dma_address))
unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
+ else
+ gnttab_dma_unmap_page(dma_address);
}
#endif
return 0;
}
+static void gnttab_page_free(struct page *page)
+{
+ if (page->mapping) {
+ put_page((struct page *)page->mapping);
+ page->mapping = NULL;
+ }
+
+ ClearPageForeign(page);
+ gnttab_reset_grant_page(page);
+ put_page(page);
+}
+
+/*
+ * Must not be called with IRQs off. This should only be used on the
+ * slow path.
+ *
+ * Copy a foreign granted page to local memory.
+ */
+int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
+{
+ struct gnttab_unmap_and_replace unmap;
+ mmu_update_t mmu;
+ struct page *page;
+ struct page *new_page;
+ void *new_addr;
+ void *addr;
+ paddr_t pfn;
+ maddr_t mfn;
+ maddr_t new_mfn;
+ int err;
+
+ page = *pagep;
+ if (!get_page_unless_zero(page))
+ return -ENOENT;
+
+ err = -ENOMEM;
+ new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!new_page)
+ goto out;
+
+ new_addr = page_address(new_page);
+ addr = page_address(page);
+ memcpy(new_addr, addr, PAGE_SIZE);
+
+ pfn = page_to_pfn(page);
+ mfn = pfn_to_mfn(pfn);
+ new_mfn = virt_to_mfn(new_addr);
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_phys_to_machine(pfn, new_mfn);
+ set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
+
+ mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ mmu.val = pfn;
+ err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
+ BUG_ON(err);
+ }
+
+ gnttab_set_replace_op(&unmap, (unsigned long)addr,
+ (unsigned long)new_addr, ref);
+
+ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
+ &unmap, 1);
+ BUG_ON(err);
+ BUG_ON(unmap.status);
+
+ new_page->mapping = page->mapping;
+ new_page->index = page->index;
+ set_bit(PG_foreign, &new_page->flags);
+ *pagep = new_page;
+
+ SetPageForeign(page, gnttab_page_free);
+ page->mapping = NULL;
+
+ /*
+ * Ensure that there is a barrier between setting the p2m entry
+ * and checking the map count. See gnttab_dma_map_page.
+ */
+ smp_mb();
+
+ /* Has the page been DMA-mapped? */
+ if (unlikely(page_mapped(page))) {
+ err = -EBUSY;
+ page->mapping = (void *)new_page;
+ }
+
+out:
+ put_page(page);
+ return err;
+
+}
+EXPORT_SYMBOL(gnttab_copy_grant_page);
+
+/*
+ * Keep track of foreign pages marked as PageForeign so that we don't
+ * return them to the remote domain prematurely.
+ *
+ * PageForeign pages are pinned down by increasing their mapcount.
+ *
+ * All other pages are simply returned as is.
+ */
+maddr_t gnttab_dma_map_page(struct page *page)
+{
+ maddr_t mfn = pfn_to_mfn(page_to_pfn(page)), mfn2;
+
+ if (!PageForeign(page))
+ return mfn << PAGE_SHIFT;
+
+ if (mfn_to_local_pfn(mfn) < max_mapnr)
+ return mfn << PAGE_SHIFT;
+
+ atomic_set(&page->_mapcount, 0);
+
+ /* This barrier corresponds to the one in gnttab_copy_grant_page. */
+ smp_mb();
+
+ /* Has this page been copied in the mean time? */
+ mfn2 = pfn_to_mfn(page_to_pfn(page));
+
+ return mfn2 << PAGE_SHIFT;
+}
+
int gnttab_resume(void)
{
if (max_nr_grant_frames() < nr_grant_frames)
#include <asm/hypervisor.h>
#include <asm/maddr.h> /* maddr_t */
+#include <linux/mm.h>
#include <xen/interface/grant_table.h>
#include <xen/features.h>
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
unsigned long pfn);
+int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep);
+maddr_t gnttab_dma_map_page(struct page *page);
+
+static inline void gnttab_dma_unmap_page(maddr_t mfn)
+{
+}
+
+static inline void gnttab_reset_grant_page(struct page *page)
+{
+ init_page_count(page);
+ reset_page_mapcount(page);
+}
+
int gnttab_suspend(void);
int gnttab_resume(void);